From: Keir Fraser Date: Sat, 27 Jun 2009 09:33:33 +0000 (+0100) Subject: x86 svm: Make 32bit legacy guests boot again X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~13677^2~4 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22Dat/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22Dat?a=commitdiff_plain;h=d2c06ef63ff2df915e7f582da53ed6a51b14f028;p=xen.git x86 svm: Make 32bit legacy guests boot again Attached patch fixes a bug introduced in c/s 19648. 32bit legacy guests have the sysenter/sysexit instructions available. Therefore, we have to disable intercepts for the sysenter MSRs or the guest stucks in an infinite loop of #GPs, otherwise. For guests in 64bit mode and 32bit compat mode, sysenter/sysexit instructions aren't available. The sysenter MSRs have to be intercepted to make the instruction emulation working. Signed-off-by: Christoph Egger Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index e24e0bdd9a..a47c083458 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -452,10 +452,19 @@ static void svm_update_guest_cr(struct vcpu *v, unsigned int cr) static void svm_update_guest_efer(struct vcpu *v) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + bool_t lma = v->arch.hvm_vcpu.guest_efer & EFER_LMA; vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME; - if ( vmcb->efer & EFER_LMA ) + if ( lma ) vmcb->efer |= EFER_LME; + + /* + * In legacy mode (EFER.LMA=0) we natively support SYSENTER/SYSEXIT with + * no need for MSR intercepts. Ehen EFER.LMA=1 we must trap and emulate. + */ + svm_intercept_msr(v, MSR_IA32_SYSENTER_CS, lma); + svm_intercept_msr(v, MSR_IA32_SYSENTER_ESP, lma); + svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma); } static void svm_flush_guest_tlbs(void) diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index 9cc14d1a03..d1b00093b8 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -78,29 +78,34 @@ struct host_save_area *alloc_host_save_area(void) return hsa; } -void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr) +void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable) { unsigned long *msr_bitmap = v->arch.hvm_svm.msrpm; + unsigned long *msr_bit = NULL; /* * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address). */ if ( msr <= 0x1fff ) - { - __clear_bit(msr*2, msr_bitmap + 0x000/BYTES_PER_LONG); - __clear_bit(msr*2+1, msr_bitmap + 0x000/BYTES_PER_LONG); - } + msr_bit = msr_bitmap + 0x0000 / BYTES_PER_LONG; else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) + msr_bit = msr_bitmap + 0x0800 / BYTES_PER_LONG; + else if ( (msr >= 0xc0010000) && (msr <= 0xc0011fff) ) + msr_bit = msr_bitmap + 0x1000 / BYTES_PER_LONG; + + BUG_ON(msr_bit == NULL); + + msr &= 0x1fff; + + if ( enable ) { - msr &= 0x1fff; - __clear_bit(msr*2, msr_bitmap + 0x800/BYTES_PER_LONG); - __clear_bit(msr*2+1, msr_bitmap + 0x800/BYTES_PER_LONG); - } - else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) ) + __set_bit(msr * 2, msr_bit); + __set_bit(msr * 2 + 1, msr_bit); + } + else { - msr &= 0x1fff; - __clear_bit(msr*2, msr_bitmap + 0x1000/BYTES_PER_LONG); - __clear_bit(msr*2+1, msr_bitmap + 0x1000/BYTES_PER_LONG); + __clear_bit(msr * 2, msr_bit); + __clear_bit(msr * 2 + 1, msr_bit); } } @@ -165,8 +170,9 @@ static int construct_vmcb(struct vcpu *v) if ( opt_softtsc ) vmcb->general1_intercepts |= GENERAL1_INTERCEPT_RDTSC; - /* Guest EFER: *must* contain SVME or VMRUN will fail. */ - vmcb->efer = EFER_SVME; + /* Guest EFER. */ + v->arch.hvm_vcpu.guest_efer = 0; + hvm_update_guest_efer(v); /* Guest segment limits. */ vmcb->cs.limit = ~0u; diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h index d6da23584b..f9dc6440d2 100644 --- a/xen/include/asm-x86/hvm/svm/vmcb.h +++ b/xen/include/asm-x86/hvm/svm/vmcb.h @@ -481,7 +481,9 @@ void svm_destroy_vmcb(struct vcpu *v); void setup_vmcb_dump(void); -void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr); +void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable); +#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 0) +#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 1) #endif /* ASM_X86_HVM_SVM_VMCS_H__ */